Данные представляют собой аннотированные слова, записанные от 6 носителей в селении Красный Восток:
ls raw_data
## d23_stress.TextGrid
## d23_stress.wav
## d25_stress.TextGrid
## d25_stress.wav
## d26_stress.TextGrid
## d26_stress.wav
## d27_stress.TextGrid
## d27_stress.wav
## d28_stress.TextGrid
## d28_stress.wav
## d30_stress.TextGrid
## d30_stress.wav
От каждого носителя было записано от 74 до 79 стимульных слов:
library(phonfieldwork)
textgrids <- read_from_folder("raw_data", "textgrid")
textgrids %>%
filter(tier == 1) %>%
distinct(source, content) %>%
count(source)
Дальше следует описание данных, см. Appendix
draw_sound(file_name = "raw_data/d23_stress.wav",
annotation = "raw_data/d23_stress.TextGrid",
from = 0,
to = 10.6,
zoom = c(3.8, 4.4))
После этого мы применяем к этим данным вот этот скрипт:
cat get_pitch_intencity.praat
## # This is a Praat script made for investigation of Abaza vowels. It analyses multiple selected sounds
## # (TextGrids should be also uploaded to Praat Obects). The file should have the following structure:
## # * first tier --- word label
## # * second tier --- translation label
## # * third tier --- sound label
## # * fourth tier --- utterance label
##
##
## # This script is distributed under the GNU General Public License.
## # George Moroz 09.05.2022
##
## form Get Pitch listing from a file
## comment Where should the script write a result file
## text directory /home/agricolamz/for_work/HSE/students/2022_Kuznetsova/data/
## comment How should the script name a result file
## text resultfile log.txt
## comment Time step
## real step 0.01
## comment Pitch floor (Hz)
## integer floor 90
## comment Pitch ceiling (Hz)
## integer ceiling 250
## comment 5. formant ceiling (Hz)
## integer fceiling 5500
## comment Minimum pitch for intensity (Hz)
## integer mpitch 200
## endform
##
## n = numberOfSelected("Sound")
## for j to n
## sound[j] = selected("Sound", j)
## endfor
## for k to n
## selectObject: sound[k]
## object_name$ = selected$ ("Sound")
## select TextGrid 'object_name$'
## number_of_intervals = Get number of intervals... 3
## for b from 1 to number_of_intervals
## select TextGrid 'object_name$'
## interval_label$ = Get label of interval... 3 'b'
## utterance$ = Get label of interval... 4 'b'
## if interval_label$ <> ""
## start = Get starting point... 3 'b'
## end = Get end point... 3 'b'
## duration = end - start
## int_1 = Get interval at time... 1 end
## word$ = Get label of interval... 1 int_1
## trans$ = Get label of interval... 2 int_1
## select Sound 'object_name$'
## s = Extract part: start, end, "rectangular", 1, "yes"
## select s
## fragment_name$ = selected$ ("Sound")
## pitch = To Pitch... step floor ceiling
## selectObject: s
## formant = To Formant (burg): 0, 5, fceiling, 0.025, 50
## selectObject: s
## intensity = To Intensity: mpitch, 0, "no"
## i = start
## while i <= end
## select Pitch 'fragment_name$'
## f0 = Get value at time... 'i' Hertz Linear
## select Formant 'fragment_name$'
## f1 = Get value at time: 1, i, "Hertz", "Linear"
## f2 = Get value at time: 2, i, "Hertz", "Linear"
## f3 = Get value at time: 3, i, "Hertz", "Linear"
## select Intensity 'fragment_name$'
## intvalue = Get value at time: 'i', "cubic"
## i = i + 0.01
## fileappend "'directory$''resultfile$'" 'object_name$''tab$''interval_label$''tab$''utterance$''tab$''word$''tab$''trans$''tab$''f0''tab$''f1''tab$''f2''tab$''f3''tab$''intvalue''tab$''duration''tab$''i''newline$'
## endwhile
## removeObject: s
## removeObject: pitch
## removeObject: formant
## removeObject: intensity
## endif
## endfor
## # removeObject: "Sound 'object_name$'"
## # removeObject: "TextGrid 'object_name$'"
## endfor
Скрипт идет по аннотации и с шагом в 1 мс и базовыми настройками,
отраженными в меню (между form и endform),
берет значения длительности, f0, f1, f2, f3 и инетнсивности. Получается
вот такая вот таблица:
df <- read_tsv("data/log.txt", col_names = FALSE)
colnames(df) <- c("speaker", "vowel", "utterance", "word", "translation", "f0", "f1", "f2", "f3", "intensity", "duration", "step")
df
Дальше с данными можно делать разное, я просто возьму среднее по всем параметрами и создам переменную minimal_pair, которая будет включать минимальную пару:
df %>%
mutate_all(function(i){str_replace(i, "--undefined--", NA_character_)}) %>%
mutate(across(f0:duration, as.double),
stressed = ifelse(vowel == toupper(vowel), "stressed", "unstressed"),
stressed = factor(stressed, levels = c("unstressed", "stressed")),
vowel_n = str_extract(utterance, "V\\d"),
vowel_n = as.double(str_remove(vowel_n, "V")),
utterance = str_extract(utterance, "u\\d"),
utterance = as.double(str_remove(utterance, "u"))) %>%
filter(str_detect(vowel, "[аА]"),
utterance <= 4) %>%
select(-step) %>%
group_by(speaker, utterance, stressed, word, vowel_n) %>%
summarise(f0 = mean(f0, na.rm = TRUE),
f1 = mean(f1, na.rm = TRUE),
f2 = mean(f2, na.rm = TRUE),
f3 = mean(f3, na.rm = TRUE),
intensity = mean(intensity, na.rm = TRUE),
duration = mean(duration, na.rm = TRUE)) %>%
mutate(vowel_n = str_c(vowel_n, ". syllable"),
utterance = str_c(utterance, ". utterance"),
duration = duration*1000) %>%
ungroup() %>%
group_by(speaker) %>%
mutate(word_pair = as.double(factor(tolower(word)))) %>%
group_by(word_pair) %>%
mutate(minimal_pair = str_c(unique(str_c(word, "_")), collapse = ""),
minimal_pair = str_remove(minimal_pair, "_$")) %>%
ungroup() ->
mean_values
mean_values
График с разницами между минимальными парами (ударный слог минус безударный). Если разницы нет, то горб должен возвышаться над 0. Если же он от нуля смещен, то значит разница между ударными и безударными слогами есть.
mean_values %>%
select(speaker, utterance, stressed, minimal_pair, vowel_n, duration) %>%
pivot_wider(names_from = stressed, values_from = duration) %>%
mutate(duration_differance = stressed-unstressed) %>%
ggplot(aes(duration_differance, fill = vowel_n))+
geom_density(alpha = 0.4)+
facet_grid(speaker~utterance, scales = "free")
mean_values %>%
select(speaker, utterance, stressed, minimal_pair, vowel_n, f0) %>%
pivot_wider(names_from = stressed, values_from = f0) %>%
mutate(f0_differance = stressed-unstressed) %>%
ggplot(aes(f0_differance, fill = vowel_n))+
geom_density(alpha = 0.4)+
facet_grid(speaker~utterance, scales = "free")
mean_values %>%
select(speaker, utterance, stressed, minimal_pair, vowel_n, intensity) %>%
pivot_wider(names_from = stressed, values_from = intensity) %>%
mutate(intensity_differance = stressed-unstressed) %>%
ggplot(aes(intensity_differance, fill = vowel_n))+
geom_density(alpha = 0.4)+
facet_grid(speaker~utterance, scales = "free")
Я использую байесовскую логистическую регрессию со смешанными эффектами с дефолтными прайерами и формула выглядит вот так:
stressed ~ ПЕРЕМЕННАЯ * vowel_n + (1|speaker) + (1|minimal_pair/utterance)
Получается взаимодействие переменной и номера гласного в слове с носителем в смешанных эффектах и номером произнесения вложенным в минимальную пару в другом смешанном эффекте.
library(brms)
mean_values %>%
select(speaker, utterance, stressed, minimal_pair, vowel_n, duration) %>%
brm(stressed ~ duration*vowel_n + (1|speaker) + (vowel_n+1|minimal_pair/utterance),
family = bernoulli(),
data = .) ->
fit_duration
##
## SAMPLING FOR MODEL 'e67f1fb651efe88012e69c4f524f5c99' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 0.000846 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 8.46 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 1: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 1: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 1: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 1: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 1: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 1: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 1: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 1: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 1: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 1: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 1: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 101.938 seconds (Warm-up)
## Chain 1: 38.3152 seconds (Sampling)
## Chain 1: 140.253 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'e67f1fb651efe88012e69c4f524f5c99' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 0.000761 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 7.61 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 2: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 2: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 2: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 2: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 2: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 2: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 2: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 2: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 2: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 2: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 2: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 99.882 seconds (Warm-up)
## Chain 2: 36.5555 seconds (Sampling)
## Chain 2: 136.438 seconds (Total)
## Chain 2:
##
## SAMPLING FOR MODEL 'e67f1fb651efe88012e69c4f524f5c99' NOW (CHAIN 3).
## Chain 3:
## Chain 3: Gradient evaluation took 0.000859 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 8.59 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3:
## Chain 3:
## Chain 3: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 3: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 3: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 3: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 3: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 3: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 3: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 3: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 3: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 3: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 3: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 3: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 3:
## Chain 3: Elapsed Time: 101.186 seconds (Warm-up)
## Chain 3: 38.7163 seconds (Sampling)
## Chain 3: 139.902 seconds (Total)
## Chain 3:
##
## SAMPLING FOR MODEL 'e67f1fb651efe88012e69c4f524f5c99' NOW (CHAIN 4).
## Chain 4:
## Chain 4: Gradient evaluation took 0.001398 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 13.98 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4:
## Chain 4:
## Chain 4: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 4: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 4: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 4: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 4: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 4: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 4: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 4: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 4: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 4: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 4: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 4: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 4:
## Chain 4: Elapsed Time: 99.9226 seconds (Warm-up)
## Chain 4: 69.3459 seconds (Sampling)
## Chain 4: 169.268 seconds (Total)
## Chain 4:
conditional_effects(fit_duration,
effects = c("duration:vowel_n"))
mean_values %>%
select(speaker, utterance, stressed, minimal_pair, vowel_n, f0) %>%
brm(stressed ~ f0*vowel_n + (1|speaker) + (vowel_n+1|minimal_pair/utterance),
family = bernoulli(),
data = .) ->
fit_f0
##
## SAMPLING FOR MODEL 'e67f1fb651efe88012e69c4f524f5c99' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 0.000543 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 5.43 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 1: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 1: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 1: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 1: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 1: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 1: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 1: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 1: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 1: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 1: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 1: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 104.906 seconds (Warm-up)
## Chain 1: 38.7913 seconds (Sampling)
## Chain 1: 143.697 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'e67f1fb651efe88012e69c4f524f5c99' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 0.000653 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 6.53 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 2: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 2: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 2: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 2: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 2: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 2: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 2: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 2: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 2: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 2: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 2: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 98.0157 seconds (Warm-up)
## Chain 2: 38.9235 seconds (Sampling)
## Chain 2: 136.939 seconds (Total)
## Chain 2:
##
## SAMPLING FOR MODEL 'e67f1fb651efe88012e69c4f524f5c99' NOW (CHAIN 3).
## Chain 3:
## Chain 3: Gradient evaluation took 0.001099 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 10.99 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3:
## Chain 3:
## Chain 3: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 3: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 3: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 3: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 3: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 3: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 3: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 3: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 3: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 3: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 3: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 3: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 3:
## Chain 3: Elapsed Time: 105.343 seconds (Warm-up)
## Chain 3: 49.0111 seconds (Sampling)
## Chain 3: 154.354 seconds (Total)
## Chain 3:
##
## SAMPLING FOR MODEL 'e67f1fb651efe88012e69c4f524f5c99' NOW (CHAIN 4).
## Chain 4:
## Chain 4: Gradient evaluation took 0.000679 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 6.79 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4:
## Chain 4:
## Chain 4: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 4: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 4: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 4: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 4: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 4: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 4: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 4: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 4: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 4: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 4: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 4: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 4:
## Chain 4: Elapsed Time: 118.362 seconds (Warm-up)
## Chain 4: 46.8777 seconds (Sampling)
## Chain 4: 165.24 seconds (Total)
## Chain 4:
conditional_effects(fit_f0,
effects = c("f0:vowel_n"))
mean_values %>%
select(speaker, utterance, stressed, minimal_pair, vowel_n, intensity) %>%
brm(stressed ~ intensity*vowel_n + (1|speaker) + (vowel_n+1|minimal_pair/utterance),
family = bernoulli(),
data = .) ->
fit_intensity
##
## SAMPLING FOR MODEL 'e67f1fb651efe88012e69c4f524f5c99' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 0.000907 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 9.07 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 1: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 1: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 1: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 1: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 1: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 1: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 1: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 1: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 1: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 1: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 1: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 156.623 seconds (Warm-up)
## Chain 1: 158.909 seconds (Sampling)
## Chain 1: 315.532 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'e67f1fb651efe88012e69c4f524f5c99' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 0.000701 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 7.01 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 2: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 2: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 2: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 2: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 2: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 2: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 2: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 2: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 2: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 2: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 2: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 156.422 seconds (Warm-up)
## Chain 2: 163.578 seconds (Sampling)
## Chain 2: 319.999 seconds (Total)
## Chain 2:
##
## SAMPLING FOR MODEL 'e67f1fb651efe88012e69c4f524f5c99' NOW (CHAIN 3).
## Chain 3:
## Chain 3: Gradient evaluation took 0.000944 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 9.44 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3:
## Chain 3:
## Chain 3: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 3: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 3: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 3: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 3: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 3: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 3: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 3: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 3: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 3: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 3: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 3: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 3:
## Chain 3: Elapsed Time: 166.644 seconds (Warm-up)
## Chain 3: 155.57 seconds (Sampling)
## Chain 3: 322.214 seconds (Total)
## Chain 3:
##
## SAMPLING FOR MODEL 'e67f1fb651efe88012e69c4f524f5c99' NOW (CHAIN 4).
## Chain 4:
## Chain 4: Gradient evaluation took 0.000578 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 5.78 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4:
## Chain 4:
## Chain 4: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 4: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 4: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 4: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 4: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 4: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 4: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 4: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 4: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 4: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 4: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 4: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 4:
## Chain 4: Elapsed Time: 153.336 seconds (Warm-up)
## Chain 4: 158.045 seconds (Sampling)
## Chain 4: 311.381 seconds (Total)
## Chain 4:
conditional_effects(fit_intensity,
effects = c("intensity:vowel_n"))
textgrids %>%
filter(tier == 1) %>%
distinct(content, source) %>%
rename(word = content,
speaker = source) %>%
mutate(speaker = str_extract(speaker, "d\\d\\d"))
Нужно в IPA перевести… https://github.com/agricolamz/abaza_cyrillic_to_trans